and consumes 25% of the per-cpu area.
Add CONFIG_X86_NO_TSS to exclude all code which references tss.
Based on a patch by Jan Beulich <JBeulich@novell.com>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
help
This option will compile a kernel compatible with Xen hypervisor
+config X86_NO_TSS
+ bool
+ depends on X86_64_XEN
+ default y
+
#
# Define implied options from the CPU selection here
#
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
+#ifndef CONFIG_X86_NO_TSS
BLANK();
DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
+#endif
return 0;
}
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
+
+#ifndef CONFIG_X86_NO_TSS
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
+#endif
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
kprobe_flush_task(me);
if (me->thread.io_bitmap_ptr) {
+#ifndef CONFIG_X86_NO_TSS
struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+#endif
+#ifdef CONFIG_XEN
+ static physdev_op_t iobmp_op = {
+ .cmd = PHYSDEVOP_SET_IOBITMAP
+ };
+#endif
kfree(t->io_bitmap_ptr);
t->io_bitmap_ptr = NULL;
/*
* Careful, clear this in the TSS too:
*/
+#ifndef CONFIG_X86_NO_TSS
memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
- t->io_bitmap_max = 0;
put_cpu();
+#endif
+#ifdef CONFIG_XEN
+ HYPERVISOR_physdev_op(&iobmp_op);
+#endif
+ t->io_bitmap_max = 0;
}
}
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
int cpu = smp_processor_id();
+#ifndef CONFIG_X86_NO_TSS
struct tss_struct *tss = &per_cpu(init_tss, cpu);
+#endif
physdev_op_t iopl_op, iobmp_op;
multicall_entry_t _mcl[8], *mcl = _mcl;
/*
* Reload esp0, LDT and the page table pointer:
*/
- tss->rsp0 = next->rsp0;
mcl->op = __HYPERVISOR_stack_switch;
mcl->args[0] = __KERNEL_DS;
- mcl->args[1] = tss->rsp0;
+ mcl->args[1] = next->rsp0;
mcl++;
/*
pda->irqstackptr += IRQSTACKSIZE-64;
}
+#ifndef CONFIG_X86_NO_TSS
char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
__attribute__((section(".bss.page_aligned")));
+#endif
/* May not be marked __init: used by software suspend */
void syscall_init(void)
void __cpuinit cpu_init (void)
{
int cpu = stack_smp_processor_id();
+#ifndef CONFIG_X86_NO_TSS
struct tss_struct *t = &per_cpu(init_tss, cpu);
unsigned long v;
char *estacks = NULL;
+ unsigned i;
+#endif
struct task_struct *me;
- int i;
/* CPU 0 is initialised in head64.c */
if (cpu != 0) {
pda_init(cpu);
zap_low_mappings(cpu);
- } else
+ }
+#ifndef CONFIG_X86_NO_TSS
+ else
estacks = boot_exception_stacks;
+#endif
me = current;
check_efer();
+#ifndef CONFIG_X86_NO_TSS
/*
* set up and load the per-CPU TSS
*/
*/
for (i = 0; i <= IO_BITMAP_LONGS; i++)
t->io_bitmap[i] = ~0UL;
+#endif
atomic_inc(&init_mm.mm_count);
me->active_mm = &init_mm;
BUG();
enter_lazy_tlb(&init_mm, me);
-#ifndef CONFIG_XEN
+#ifndef CONFIG_X86_NO_TSS
set_tss_desc(cpu, t);
+#endif
+#ifndef CONFIG_XEN
load_TR_desc();
#endif
load_LDT(&init_mm.context);
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, const char **idp)
{
+#ifndef CONFIG_X86_NO_TSS
static char ids[][8] = {
[DEBUG_STACK - 1] = "#DB",
[NMI_STACK - 1] = "NMI",
}
#endif
}
+#endif
return NULL;
}
#ifdef __x86_64__
cpu_pda(cpu)->pcurrent = idle;
cpu_pda(cpu)->cpunumber = cpu;
- per_cpu(init_tss,cpu).rsp0 = idle->thread.rsp;
clear_ti_thread_flag(idle->thread_info, TIF_FORK);
#endif
memcpy(ptr, &d, 16);
}
+#ifndef CONFIG_X86_NO_TSS
static inline void set_tss_desc(unsigned cpu, void *addr)
{
/*
(unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
}
+#endif
static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
{
#define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#ifndef CONFIG_X86_NO_TSS
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+#endif
#define INVALID_IO_BITMAP_OFFSET 0x8000
struct i387_fxsave_struct {
struct i387_fxsave_struct fxsave;
};
+#ifndef CONFIG_X86_NO_TSS
struct tss_struct {
u32 reserved1;
u64 rsp0;
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
} __attribute__((packed)) ____cacheline_aligned;
-extern struct cpuinfo_x86 boot_cpu_data;
DECLARE_PER_CPU(struct tss_struct,init_tss);
+#endif
+
+extern struct cpuinfo_x86 boot_cpu_data;
#ifdef CONFIG_X86_VSMP
#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
+#ifndef CONFIG_X86_NO_TSS
#define INIT_TSS { \
.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
+#endif
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }